SVHN using SSD

In [1]:
import ipywidgets as widgets
import os
import sys
import skimage.io
import scipy
import json
In [2]:
lib_directory="C:\\Users\\Aashish\\Documents\\houseNumberOCR\\ssd_keras\\"
data_directory="C:\\Users\\Aashish\\Documents\\houseNumberOCR\\extra\\"
In [3]:
import cv2
from utils_ssd import *
import pandas as pd
from PIL import Image

from matplotlib import pyplot as plt

%matplotlib inline
%load_ext autoreload
%autoreload 2
Using TensorFlow backend.
C:\Users\Aashish\anaconda3\lib\site-packages\tensorflow\python\framework\dtypes.py:516: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
  _np_qint8 = np.dtype([("qint8", np.int8, 1)])
C:\Users\Aashish\anaconda3\lib\site-packages\tensorflow\python\framework\dtypes.py:517: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
  _np_quint8 = np.dtype([("quint8", np.uint8, 1)])
C:\Users\Aashish\anaconda3\lib\site-packages\tensorflow\python\framework\dtypes.py:518: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
  _np_qint16 = np.dtype([("qint16", np.int16, 1)])
C:\Users\Aashish\anaconda3\lib\site-packages\tensorflow\python\framework\dtypes.py:519: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
  _np_quint16 = np.dtype([("quint16", np.uint16, 1)])
C:\Users\Aashish\anaconda3\lib\site-packages\tensorflow\python\framework\dtypes.py:520: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
  _np_qint32 = np.dtype([("qint32", np.int32, 1)])
C:\Users\Aashish\anaconda3\lib\site-packages\tensorflow\python\framework\dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
  np_resource = np.dtype([("resource", np.ubyte, 1)])
C:\Users\Aashish\anaconda3\lib\site-packages\tensorboard\compat\tensorflow_stub\dtypes.py:541: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
  _np_qint8 = np.dtype([("qint8", np.int8, 1)])
C:\Users\Aashish\anaconda3\lib\site-packages\tensorboard\compat\tensorflow_stub\dtypes.py:542: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
  _np_quint8 = np.dtype([("quint8", np.uint8, 1)])
C:\Users\Aashish\anaconda3\lib\site-packages\tensorboard\compat\tensorflow_stub\dtypes.py:543: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
  _np_qint16 = np.dtype([("qint16", np.int16, 1)])
C:\Users\Aashish\anaconda3\lib\site-packages\tensorboard\compat\tensorflow_stub\dtypes.py:544: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
  _np_quint16 = np.dtype([("quint16", np.uint16, 1)])
C:\Users\Aashish\anaconda3\lib\site-packages\tensorboard\compat\tensorflow_stub\dtypes.py:545: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
  _np_qint32 = np.dtype([("qint32", np.int32, 1)])
C:\Users\Aashish\anaconda3\lib\site-packages\tensorboard\compat\tensorflow_stub\dtypes.py:550: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
  np_resource = np.dtype([("resource", np.ubyte, 1)])
In [4]:
LIBRARY_DIR = os.path.abspath(lib_directory) 
sys.path.append(LIBRARY_DIR)

Convert the mat file to csv

In [5]:
def read_process_h5(filename):
        
    f = h5py.File(filename, 'r')
    groups = list(f['digitStruct'].items())
    bbox_ds = np.array(groups[0][1]).squeeze()
    names_ds = np.array(groups[1][1]).squeeze()

    data_list = []
    num_files = bbox_ds.shape[0]
    count = 0
    ds_size = 10000
    for objref1, objref2 in zip(bbox_ds[:ds_size], names_ds[:ds_size]):

        data_dict = {}

        
        names_ds = np.array(f[objref2]).squeeze()
        filename = ''.join(chr(x) for x in names_ds)
        data_dict['filename'] = filename

        items1 = list(f[objref1].items())

        labels_ds = np.array(items1[1][1]).squeeze()
        try:
            label_vals = [int(f[ref][:][0, 0]) for ref in labels_ds]
        except TypeError:
            label_vals = [labels_ds]
        data_dict['labels'] = label_vals
        data_dict['length'] = len(label_vals)

        height_ds = np.array(items1[0][1]).squeeze()
        try:
            height_vals = [f[ref][:][0, 0] for ref in height_ds]
        except TypeError:
            height_vals = [height_ds]
        data_dict['height'] = height_vals

        left_ds = np.array(items1[2][1]).squeeze()
        try:
            left_vals = [f[ref][:][0, 0] for ref in left_ds]
        except TypeError:
            left_vals = [left_ds]
        data_dict['left'] = left_vals

        top_ds = np.array(items1[3][1]).squeeze()
        try:
            top_vals = [f[ref][:][0, 0] for ref in top_ds]
        except TypeError:
            top_vals = [top_ds]
        data_dict['top'] = top_vals

        width_ds = np.array(items1[4][1]).squeeze()
        try:
            width_vals = [f[ref][:][0, 0] for ref in width_ds]
        except TypeError:
            width_vals = [width_ds]
        data_dict['width'] = width_vals

        data_list.append(data_dict)

        count += 1
    return data_list

def json_to_csv(json, filename):
    pascal_list = []
    for i in json:
        for j in range(len(i['labels'])):
            pascal_list.append({'class_id': int(i['labels'][j]), 'fname': i['filename'] 
            ,'xmin': int(i['left'][j]), 'xmax': int(i['left'][j]+i['width'][j])
            ,'ymin': int(i['top'][j]),  'ymax': int(i['top'][j]+i['height'][j])
            })
    df_pascal = pd.DataFrame(pascal_list,dtype='str')
    df_pascal.to_csv(filename,index=False)
In [6]:
file_path = data_directory+'/digitStruct.mat'
p  = read_process_h5(file_path)
json_to_csv(p, data_directory+'/data.csv')
In [7]:
task = 'svhn'
labels_path = f'{data_directory}data.csv'
input_format = ['class_id','image_name','xmin','xmax','ymin','ymax' ]
df = pd.read_csv(labels_path)

Boxing check using random image

In [8]:
def view_random(df):
    file = np.random.choice(df.fname)
    im = skimage.io.imread(f'{data_directory}/{file}')
    annots =  df[df.fname==file].iterrows()

    plt.figure(figsize=(6,6))
    plt.imshow(im)

    current_axis = plt.gca()

    for box in annots:
        label = box[1]['class_id']
        current_axis.add_patch(plt.Rectangle(
            (box[1]['xmin'], box[1]['ymin']), box[1]['xmax']-box[1]['xmin'],
            box[1]['ymax']-box[1]['ymin'], color='blue', fill=False, linewidth=2))  
        current_axis.text(box[1]['xmin'], box[1]['ymin'], label, size='x-large', color='white', bbox={'facecolor':'blue', 'alpha':1.0})
    plt.show()    
In [9]:
view_random(df)

Set Configurations

In [10]:
class Configuration(Config):
    batch_size = 8
    dataset_folder = data_directory
    task = task
    labels_path = labels_path
    input_format = input_format

conf=Configuration()

resize = Resize(height=conf.img_height, width=conf.img_width)
trans = [resize]
In [11]:
learner = SSD_finetune(conf)
learner.get_data(create_subset=True)

weights_destination_path=learner.init_weights()

learner.get_model(mode='training', weights_path = weights_destination_path)
model = learner.model
learner.get_input_encoder()
ssd_input_encoder = learner.ssd_input_encoder
data loading and preperations
class_ids [ 0  1  2  3  4  5  6  7  8  9 10]  should be numeric
input format: ['class_id', 'image_name', 'xmin', 'xmax', 'ymin', 'ymax']
   class_id  fname  xmin  xmax  ymin  ymax
0         4  1.png    24    62    70   126
1         7  1.png    55    91    41    97
split to 6670  train files 1665  val files 1665  test files

loading weights
classes are: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
last layer indicies [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253]
Shape of the 'conv4_3_norm_mbox_conf' weights:

kernel:	 (3, 3, 512, 44)
bias:	 (44,)
WARNING:tensorflow:From C:\Users\Aashish\anaconda3\lib\site-packages\keras\backend\tensorflow_backend.py:4070: The name tf.nn.max_pool is deprecated. Please use tf.nn.max_pool2d instead.

C:\Users\Aashish\Documents\houseNumberOCR\OCR-master\utils_ssd.py:159: H5pyDeprecationWarning: The default file mode will change to 'r' (read-only) in h5py 3.0. To suppress this warning, pass the mode you need to h5py.File(), or set the global default h5.get_config().default_file_mode, or set the environment variable H5PY_DEFAULT_READONLY=1. Available modes are: 'r', 'r+', 'w', 'w-'/'x', 'a'. See the docs for details.
  weights_destination_file = h5py.File(weights_destination_path)
C:\Users\Aashish\Documents\houseNumberOCR\OCR-master\utils_ssd.py:177: H5pyDeprecationWarning: dataset.value has been deprecated. Use dataset[()] instead.
  kernel = weights_source_file[name][name]['kernel:0'].value
C:\Users\Aashish\Documents\houseNumberOCR\OCR-master\utils_ssd.py:178: H5pyDeprecationWarning: dataset.value has been deprecated. Use dataset[()] instead.
  bias = weights_source_file[name][name]['bias:0'].value
tracking <tf.Variable 'conv4_3_norm/conv4_3_norm_gamma:0' shape=(512,) dtype=float32> gamma
Loading weights from C:\Users\Aashish\Documents\houseNumberOCR\VGG_coco_SSD_300x300_iter_400000.h5_subsampled_11_classes.h5
WARNING:tensorflow:From C:\Users\Aashish\Documents\houseNumberOCR\ssd_keras\keras_loss_function\keras_ssd_loss.py:95: The name tf.log is deprecated. Please use tf.math.log instead.

WARNING:tensorflow:From C:\Users\Aashish\Documents\houseNumberOCR\ssd_keras\keras_loss_function\keras_ssd_loss.py:133: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.cast` instead.
WARNING:tensorflow:From C:\Users\Aashish\Documents\houseNumberOCR\ssd_keras\keras_loss_function\keras_ssd_loss.py:74: add_dispatch_support.<locals>.wrapper (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.where in 2.0, which has the same broadcast rule as np.where
WARNING:tensorflow:From C:\Users\Aashish\Documents\houseNumberOCR\ssd_keras\keras_loss_function\keras_ssd_loss.py:166: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.cast` instead.
created encoder with 10 classes
In [12]:
train_annotation_file=f'{conf.dataset_folder}train_data.csv'
val_annotation_file=f'{conf.dataset_folder}validation_data.csv'
test_annotation_file=f'{conf.dataset_folder}test_data.csv'

Divide data into train and validation (train - 6670 images, Validation - 1665 images)

In [13]:
train_generator = learner.get_generator(conf.batch_size, trans=trans, anot_file=train_annotation_file,
                  encoder=ssd_input_encoder)

val_generator = learner.get_generator(conf.batch_size, trans=trans, anot_file=val_annotation_file, 
                                      encoder=ssd_input_encoder,val=True)
Loaded 6670 images, with [<data_generator.object_detection_2d_geometric_ops.Resize object at 0x0000021EC8E41F48>] transformations
Loaded 1665 images, with [<data_generator.object_detection_2d_geometric_ops.Resize object at 0x0000021EC8E41F48>] transformations

Training dataset

In [14]:
learner.init_training()
C:\Users\Aashish\anaconda3\lib\site-packages\keras\callbacks\callbacks.py:998: UserWarning: `epsilon` argument is deprecated and will be removed, use `min_delta` instead.
  warnings.warn('`epsilon` argument is deprecated and '
In [15]:
histroy = learner.train(train_generator, val_generator, steps=100,epochs=60)
WARNING:tensorflow:From C:\Users\Aashish\anaconda3\lib\site-packages\keras\backend\tensorflow_backend.py:422: The name tf.global_variables is deprecated. Please use tf.compat.v1.global_variables instead.

WARNING:tensorflow:From C:\Users\Aashish\anaconda3\lib\site-packages\keras\callbacks\tensorboard_v1.py:200: The name tf.summary.merge_all is deprecated. Please use tf.compat.v1.summary.merge_all instead.

WARNING:tensorflow:From C:\Users\Aashish\anaconda3\lib\site-packages\keras\callbacks\tensorboard_v1.py:203: The name tf.summary.FileWriter is deprecated. Please use tf.compat.v1.summary.FileWriter instead.

Epoch 1/60
100/100 [==============================] - 38s 378ms/step - loss: 19.6860 - val_loss: 8.1427
WARNING:tensorflow:From C:\Users\Aashish\anaconda3\lib\site-packages\keras\callbacks\tensorboard_v1.py:343: The name tf.Summary is deprecated. Please use tf.compat.v1.Summary instead.


Epoch 00001: val_loss improved from inf to 8.14272, saving model to ./logs/ssd_20200427T0827_svhn_classes_10-01_loss-19.6860            _val_loss-8.1427.h5
Epoch 2/60
100/100 [==============================] - 32s 315ms/step - loss: 7.4114 - val_loss: 6.9974

Epoch 00002: val_loss improved from 8.14272 to 6.99742, saving model to ./logs/ssd_20200427T0827_svhn_classes_10-02_loss-7.4114            _val_loss-6.9974.h5
Epoch 3/60
100/100 [==============================] - 32s 316ms/step - loss: 7.2096 - val_loss: 7.0728

Epoch 00003: val_loss did not improve from 6.99742
Epoch 4/60
100/100 [==============================] - 32s 317ms/step - loss: 7.0630 - val_loss: 6.7456

Epoch 00004: val_loss improved from 6.99742 to 6.74560, saving model to ./logs/ssd_20200427T0827_svhn_classes_10-04_loss-7.0630            _val_loss-6.7456.h5
Epoch 5/60
100/100 [==============================] - 32s 317ms/step - loss: 6.8425 - val_loss: 6.6179

Epoch 00005: val_loss improved from 6.74560 to 6.61787, saving model to ./logs/ssd_20200427T0827_svhn_classes_10-05_loss-6.8425            _val_loss-6.6179.h5
Epoch 6/60
100/100 [==============================] - 32s 317ms/step - loss: 6.7726 - val_loss: 6.5926

Epoch 00006: val_loss improved from 6.61787 to 6.59262, saving model to ./logs/ssd_20200427T0827_svhn_classes_10-06_loss-6.7726            _val_loss-6.5926.h5
Epoch 7/60
100/100 [==============================] - 32s 317ms/step - loss: 6.5292 - val_loss: 6.4806

Epoch 00007: val_loss improved from 6.59262 to 6.48059, saving model to ./logs/ssd_20200427T0827_svhn_classes_10-07_loss-6.5292            _val_loss-6.4806.h5
Epoch 8/60
100/100 [==============================] - 32s 317ms/step - loss: 6.4363 - val_loss: 6.4746

Epoch 00008: val_loss improved from 6.48059 to 6.47458, saving model to ./logs/ssd_20200427T0827_svhn_classes_10-08_loss-6.4363            _val_loss-6.4746.h5
Epoch 9/60
100/100 [==============================] - 34s 335ms/step - loss: 6.3977 - val_loss: 6.3677

Epoch 00009: val_loss improved from 6.47458 to 6.36767, saving model to ./logs/ssd_20200427T0827_svhn_classes_10-09_loss-6.3977            _val_loss-6.3677.h5
Epoch 10/60
100/100 [==============================] - 32s 317ms/step - loss: 6.3070 - val_loss: 6.9588

Epoch 00010: val_loss did not improve from 6.36767
Epoch 11/60
100/100 [==============================] - 32s 318ms/step - loss: 6.2589 - val_loss: 6.0050

Epoch 00011: val_loss improved from 6.36767 to 6.00496, saving model to ./logs/ssd_20200427T0827_svhn_classes_10-11_loss-6.2589            _val_loss-6.0050.h5
Epoch 12/60
100/100 [==============================] - 32s 317ms/step - loss: 6.2421 - val_loss: 6.4340

Epoch 00012: val_loss did not improve from 6.00496
Epoch 13/60
100/100 [==============================] - 32s 317ms/step - loss: 6.1143 - val_loss: 5.7859

Epoch 00013: val_loss improved from 6.00496 to 5.78589, saving model to ./logs/ssd_20200427T0827_svhn_classes_10-13_loss-6.1143            _val_loss-5.7859.h5
Epoch 14/60
100/100 [==============================] - 32s 316ms/step - loss: 5.9585 - val_loss: 5.2978

Epoch 00014: val_loss improved from 5.78589 to 5.29781, saving model to ./logs/ssd_20200427T0827_svhn_classes_10-14_loss-5.9585            _val_loss-5.2978.h5
Epoch 15/60
100/100 [==============================] - 32s 318ms/step - loss: 5.7681 - val_loss: 5.7024

Epoch 00015: val_loss did not improve from 5.29781
Epoch 16/60
100/100 [==============================] - 32s 317ms/step - loss: 5.7088 - val_loss: 5.2130

Epoch 00016: val_loss improved from 5.29781 to 5.21298, saving model to ./logs/ssd_20200427T0827_svhn_classes_10-16_loss-5.7088            _val_loss-5.2130.h5
Epoch 17/60
100/100 [==============================] - 32s 317ms/step - loss: 5.5353 - val_loss: 5.5184

Epoch 00017: val_loss did not improve from 5.21298
Epoch 18/60
100/100 [==============================] - 32s 317ms/step - loss: 5.4489 - val_loss: 5.6060

Epoch 00018: val_loss did not improve from 5.21298
Epoch 19/60
100/100 [==============================] - 32s 318ms/step - loss: 5.2386 - val_loss: 5.4959

Epoch 00019: val_loss did not improve from 5.21298
Epoch 20/60
100/100 [==============================] - 32s 317ms/step - loss: 5.0862 - val_loss: 5.2026

Epoch 00020: val_loss improved from 5.21298 to 5.20263, saving model to ./logs/ssd_20200427T0827_svhn_classes_10-20_loss-5.0862            _val_loss-5.2026.h5
Epoch 21/60
100/100 [==============================] - 32s 317ms/step - loss: 5.1267 - val_loss: 5.1003

Epoch 00021: val_loss improved from 5.20263 to 5.10031, saving model to ./logs/ssd_20200427T0827_svhn_classes_10-21_loss-5.1267            _val_loss-5.1003.h5
Epoch 22/60
100/100 [==============================] - 32s 317ms/step - loss: 5.0139 - val_loss: 4.7095

Epoch 00022: val_loss improved from 5.10031 to 4.70947, saving model to ./logs/ssd_20200427T0827_svhn_classes_10-22_loss-5.0139            _val_loss-4.7095.h5
Epoch 23/60
100/100 [==============================] - 32s 317ms/step - loss: 4.8810 - val_loss: 4.8431

Epoch 00023: val_loss did not improve from 4.70947
Epoch 24/60
100/100 [==============================] - 32s 317ms/step - loss: 4.8825 - val_loss: 4.1017

Epoch 00024: val_loss improved from 4.70947 to 4.10173, saving model to ./logs/ssd_20200427T0827_svhn_classes_10-24_loss-4.8825            _val_loss-4.1017.h5
Epoch 25/60
100/100 [==============================] - 32s 317ms/step - loss: 4.7655 - val_loss: 5.1662

Epoch 00025: val_loss did not improve from 4.10173
Epoch 26/60
100/100 [==============================] - 32s 317ms/step - loss: 4.5682 - val_loss: 4.7980

Epoch 00026: val_loss did not improve from 4.10173
Epoch 27/60
100/100 [==============================] - 32s 316ms/step - loss: 4.6400 - val_loss: 4.2414

Epoch 00027: val_loss did not improve from 4.10173
Epoch 28/60
100/100 [==============================] - 31s 314ms/step - loss: 4.6399 - val_loss: 3.9777

Epoch 00028: val_loss improved from 4.10173 to 3.97765, saving model to ./logs/ssd_20200427T0827_svhn_classes_10-28_loss-4.6399            _val_loss-3.9777.h5
Epoch 29/60
100/100 [==============================] - 31s 313ms/step - loss: 4.5443 - val_loss: 4.1238

Epoch 00029: val_loss did not improve from 3.97765
Epoch 30/60
100/100 [==============================] - 31s 313ms/step - loss: 4.4414 - val_loss: 4.6977

Epoch 00030: val_loss did not improve from 3.97765
Epoch 31/60
100/100 [==============================] - 31s 313ms/step - loss: 4.4411 - val_loss: 4.1350

Epoch 00031: val_loss did not improve from 3.97765
Epoch 32/60
100/100 [==============================] - 31s 312ms/step - loss: 4.4459 - val_loss: 4.9099

Epoch 00032: val_loss did not improve from 3.97765
Epoch 33/60
100/100 [==============================] - 31s 313ms/step - loss: 4.5038 - val_loss: 4.3419

Epoch 00033: val_loss did not improve from 3.97765
Epoch 34/60
100/100 [==============================] - 31s 312ms/step - loss: 4.3673 - val_loss: 5.4613

Epoch 00034: val_loss did not improve from 3.97765
Epoch 35/60
100/100 [==============================] - 31s 313ms/step - loss: 4.1221 - val_loss: 4.5298

Epoch 00035: val_loss did not improve from 3.97765
Epoch 36/60
100/100 [==============================] - 31s 313ms/step - loss: 4.1125 - val_loss: 4.3997

Epoch 00036: val_loss did not improve from 3.97765
Epoch 37/60
100/100 [==============================] - 31s 313ms/step - loss: 4.1802 - val_loss: 4.6601

Epoch 00037: val_loss did not improve from 3.97765
Epoch 38/60
100/100 [==============================] - 31s 313ms/step - loss: 4.2539 - val_loss: 4.4327

Epoch 00038: ReduceLROnPlateau reducing learning rate to 0.0005000000237487257.

Epoch 00038: val_loss did not improve from 3.97765
Epoch 39/60
100/100 [==============================] - 31s 313ms/step - loss: 3.9382 - val_loss: 4.4302

Epoch 00039: val_loss did not improve from 3.97765
Epoch 40/60
100/100 [==============================] - 32s 316ms/step - loss: 3.8786 - val_loss: 3.6510

Epoch 00040: val_loss improved from 3.97765 to 3.65100, saving model to ./logs/ssd_20200427T0827_svhn_classes_10-40_loss-3.8786            _val_loss-3.6510.h5
Epoch 41/60
100/100 [==============================] - 32s 317ms/step - loss: 3.8188 - val_loss: 3.9350

Epoch 00041: val_loss did not improve from 3.65100
Epoch 42/60
100/100 [==============================] - 32s 317ms/step - loss: 3.6725 - val_loss: 3.2012

Epoch 00042: val_loss improved from 3.65100 to 3.20118, saving model to ./logs/ssd_20200427T0827_svhn_classes_10-42_loss-3.6741            _val_loss-3.2012.h5
Epoch 43/60
100/100 [==============================] - 32s 318ms/step - loss: 3.5633 - val_loss: 3.1142

Epoch 00043: val_loss improved from 3.20118 to 3.11416, saving model to ./logs/ssd_20200427T0827_svhn_classes_10-43_loss-3.5633            _val_loss-3.1142.h5
Epoch 44/60
100/100 [==============================] - 32s 317ms/step - loss: 3.5942 - val_loss: 4.3055

Epoch 00044: val_loss did not improve from 3.11416
Epoch 45/60
100/100 [==============================] - 32s 318ms/step - loss: 3.6665 - val_loss: 3.7516

Epoch 00045: val_loss did not improve from 3.11416
Epoch 46/60
100/100 [==============================] - 32s 318ms/step - loss: 3.6799 - val_loss: 3.2500

Epoch 00046: val_loss did not improve from 3.11416
Epoch 47/60
100/100 [==============================] - 32s 318ms/step - loss: 3.6753 - val_loss: 3.0543

Epoch 00047: val_loss improved from 3.11416 to 3.05426, saving model to ./logs/ssd_20200427T0827_svhn_classes_10-47_loss-3.6753            _val_loss-3.0543.h5
Epoch 48/60
100/100 [==============================] - 32s 317ms/step - loss: 3.6850 - val_loss: 3.9944

Epoch 00048: val_loss did not improve from 3.05426
Epoch 49/60
100/100 [==============================] - 32s 318ms/step - loss: 3.6215 - val_loss: 3.1960

Epoch 00049: val_loss did not improve from 3.05426
Epoch 50/60
100/100 [==============================] - 32s 317ms/step - loss: 3.5717 - val_loss: 3.1408

Epoch 00050: val_loss did not improve from 3.05426
Epoch 51/60
100/100 [==============================] - 32s 317ms/step - loss: 3.3805 - val_loss: 3.3778

Epoch 00051: val_loss did not improve from 3.05426
Epoch 52/60
100/100 [==============================] - 32s 317ms/step - loss: 3.4553 - val_loss: 3.4489

Epoch 00052: val_loss did not improve from 3.05426
Epoch 53/60
100/100 [==============================] - 32s 317ms/step - loss: 3.4911 - val_loss: 3.4555

Epoch 00053: val_loss did not improve from 3.05426
Epoch 54/60
100/100 [==============================] - 32s 318ms/step - loss: 3.4917 - val_loss: 4.8517

Epoch 00054: val_loss did not improve from 3.05426
Epoch 55/60
100/100 [==============================] - 32s 317ms/step - loss: 3.4819 - val_loss: 4.5498

Epoch 00055: val_loss did not improve from 3.05426
Epoch 56/60
100/100 [==============================] - 32s 317ms/step - loss: 3.5312 - val_loss: 3.1520

Epoch 00056: val_loss did not improve from 3.05426
Epoch 57/60
100/100 [==============================] - 32s 317ms/step - loss: 3.3886 - val_loss: 3.3227

Epoch 00057: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.

Epoch 00057: val_loss did not improve from 3.05426
Epoch 58/60
100/100 [==============================] - 32s 317ms/step - loss: 3.4156 - val_loss: 3.7714

Epoch 00058: val_loss did not improve from 3.05426
Epoch 59/60
100/100 [==============================] - 32s 317ms/step - loss: 3.1376 - val_loss: 3.1214

Epoch 00059: val_loss did not improve from 3.05426
Epoch 60/60
100/100 [==============================] - 32s 318ms/step - loss: 3.0906 - val_loss: 3.3313

Epoch 00060: val_loss did not improve from 3.05426
In [16]:
from eval_utils.average_precision_evaluator import Evaluator

Evaluate for one image

In [17]:
print(learner.classes)
class_count = 10
test_dataset = learner.prepare_ds(conf.dataset_folder, test_annotation_file)
test_generator = learner.get_generator(conf.batch_size, trans=trans, anot_file=test_annotation_file,
                  encoder=ssd_input_encoder)
imgs,gt = next(test_generator)
y_pred = learner.model.predict(np.expand_dims(imgs[0],0))

from ssd_encoder_decoder.ssd_output_decoder import decode_detections, decode_detections_fast

y_pred_decoded = decode_detections(y_pred,
               confidence_thresh=0.5,
               iou_threshold=0.45,
               top_k=200,
               input_coords='centroids',
               normalize_coords=True,
               img_height=conf.img_height,
               img_width=conf.img_width)
plt.figure(figsize=(6,6))
plt.imshow(imgs[0])
print(y_pred_decoded[0])

current_axis = plt.gca()

for box in y_pred_decoded[0]:
    class_id = box[0]
    confidence = box[1]
    xmin,ymin,xmax,ymax = box[2],box[3],box[4],box[5]

    label = '{}: {:.2f}'.format(learner.id2digit[class_id], confidence)
    current_axis.add_patch(plt.Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, color='blue', fill=False, linewidth=2))  
    current_axis.text(xmin, ymin, label, size='x-large', color='white', bbox={'facecolor':'blue', 'alpha':1.0})
plt.show()
[ 0  1  2  3  4  5  6  7  8  9 10]
Loaded 1665 images, with [<data_generator.object_detection_2d_geometric_ops.Resize object at 0x0000021EC8E41F48>] transformations
[[  1.           0.85796082 177.29539275  39.2108649  226.54039264
  268.69282722]
 [  6.           0.94036323 105.27054369  38.65730166 169.23594475
  264.29230571]]

Evaluate for a test dataset (1665 images)

In [18]:
ev = Evaluator(learner.model,class_count, test_dataset,model_mode='training' )
meanAveragePrecision=ev(300,300,1,data_generator_mode='resize')
print()
print("Mean Average Precision",meanAveragePrecision)
Number of images in the evaluation dataset: 1665

Producing predictions batch-wise: 100%|██████████| 1665/1665 [00:39<00:00, 41.90it/s]
Matching predictions to ground truth, class 1/10.: 100%|██████████| 7955/7955 [00:00<00:00, 25082.76it/s]
Matching predictions to ground truth, class 2/10.: 100%|██████████| 4095/4095 [00:00<00:00, 27743.06it/s]
Matching predictions to ground truth, class 3/10.: 100%|██████████| 3586/3586 [00:00<00:00, 32987.63it/s]
Matching predictions to ground truth, class 4/10.: 100%|██████████| 3075/3075 [00:00<00:00, 33513.89it/s]
Matching predictions to ground truth, class 5/10.: 100%|██████████| 2736/2736 [00:00<00:00, 31532.83it/s]
Matching predictions to ground truth, class 6/10.: 100%|██████████| 2402/2402 [00:00<00:00, 35946.99it/s]
Matching predictions to ground truth, class 7/10.: 100%|██████████| 3317/3317 [00:00<00:00, 33259.08it/s]
Matching predictions to ground truth, class 8/10.: 100%|██████████| 1731/1731 [00:00<00:00, 35421.48it/s]
Matching predictions to ground truth, class 9/10.: 100%|██████████| 1876/1876 [00:00<00:00, 38388.80it/s]
Matching predictions to ground truth, class 10/10.: 100%|██████████| 2378/2378 [00:00<00:00, 29804.70it/s]
Computing precisions and recalls, class 1/10
Computing precisions and recalls, class 2/10
Computing precisions and recalls, class 3/10
Computing precisions and recalls, class 4/10
Computing precisions and recalls, class 5/10
Computing precisions and recalls, class 6/10
Computing precisions and recalls, class 7/10
Computing precisions and recalls, class 8/10
Computing precisions and recalls, class 9/10
Computing precisions and recalls, class 10/10
Computing average precision, class 1/10
Computing average precision, class 2/10
Computing average precision, class 3/10
Computing average precision, class 4/10
Computing average precision, class 5/10
Computing average precision, class 6/10
Computing average precision, class 7/10
Computing average precision, class 8/10
Computing average precision, class 9/10
Computing average precision, class 10/10

Mean Average Precision 0.7248858961435136
In [ ]: